# ! pip install fastcore
# ! pip install --upgrade fastai
Requirement already satisfied: fastcore in /usr/local/lib/python3.7/dist-packages (1.3.27) Requirement already satisfied: packaging in /usr/local/lib/python3.7/dist-packages (from fastcore) (21.3) Requirement already satisfied: pip in /usr/local/lib/python3.7/dist-packages (from fastcore) (21.1.3) Requirement already satisfied: pyparsing!=3.0.5,>=2.0.2 in /usr/local/lib/python3.7/dist-packages (from packaging->fastcore) (3.0.6)
import os
import cv2
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from tqdm import tqdm_notebook
from sklearn.model_selection import train_test_split
from fastcore.xtras import Path
from fastai.callback.hook import summary
from fastai.callback.progress import ProgressCallback
from fastai.callback.schedule import lr_find, fit_flat_cos
from fastai.data.block import DataBlock
from fastai.data.external import untar_data, URLs
from fastai.data.transforms import get_image_files, FuncSplitter, Normalize
from fastai.layers import Mish
from fastai.losses import BaseLoss
from fastai.optimizer import ranger
from fastai.torch_core import tensor
from fastai.vision.augment import aug_transforms
from fastai.vision.core import PILImage, PILMask
from fastai.vision.data import ImageBlock, MaskBlock, imagenet_stats
from fastai.vision.learner import unet_learner
from PIL import Image
import numpy as np
from torch import nn
import torchvision.models as tvm
from torchvision.transforms import *
import torch
import torch.nn.functional as F
from fastai.losses import *
from fastai.vision.all import *
from fastai.metrics import JaccardCoeff, Dice
#from torchgeometry.losses.tversky import tversky_loss, TverskyLoss
#from kornia.losses import TverskyLoss
import glob
def FileSplitter(valid_fnames):
"Split `items` depending on the value of `mask`."
def _func(x): return x.name in valid_fnames
def _inner(o, **kwargs): return FuncSplitter(_func)(o)
return _inner
def fg_accuracy(inp, targ):
targ = targ.squeeze(1)
mask = targ != 0
return (inp.argmax(dim=1)[mask]==targ[mask]).float().mean()
def bg_accuracy(inp, targ):
targ = targ.squeeze(1)
mask = targ == 0
return (inp.argmax(dim=1)[mask]==targ[mask]).float().mean()
class IOU(AvgMetric):
"Intersection over Union Metric"
def __init__(self, class_index, class_label, axis, ignore_index=-1): store_attr('axis,class_index,class_label,ignore_index')
def accumulate(self, learn):
pred, targ = learn.pred.argmax(dim=self.axis), learn.y
intersec = ((pred == targ) & (targ == self.class_index)).sum().item()
union = (((pred == self.class_index) | (targ == self.class_index)) & (targ != self.ignore_index)).sum().item()
if union: self.total += intersec
self.count += union
@property
def name(self): return self.class_label
class CombinedLoss:
"Dice and Focal combined"
def __init__(self, axis=1, smooth=1., alpha=1.):
store_attr()
self.focal_loss = FocalLossFlat(axis=axis)
self.dice_loss = DiceLoss(axis, smooth)
def __call__(self, pred, targ):
return self.focal_loss(pred, targ) + self.alpha * self.dice_loss(pred, targ)
def decodes(self, x): return x.argmax(dim=self.axis)
def activation(self, x): return F.softmax(x, dim=self.axis)
@delegates()
class TstLearner(Learner):
def __init__(self,dls=None,model=None,**kwargs): self.pred,self.xb,self.yb = None,None,None
#Go through a fake cycle with various batch sizes and computes the value of met
def compute_val(met, x1, x2):
met.reset()
vals = [0,6,15,20]
learn = TstLearner()
for i in range(3):
learn.pred,learn.yb = x1[vals[i]:vals[i+1]],(x2[vals[i]:vals[i+1]],)
met.accumulate(learn)
return met.value
def plot_test_results(dls_tst, fnames, idx):
dl_sample = dls_tst.test_dl(test_items = [tst_fnames[idx]], with_labels=True)
plt.figure(figsize = (20,5))
preds, targs = learn.get_preds(dl=dl_sample)
pred_1 = preds[0]
img = PILImage.create(tst_fnames[idx])
plt.subplot(131)
plt.imshow(img)
plt.subplot(132)
plt.imshow(targs[0])
pred_arx = pred_1.argmax(dim=0)
plt.subplot(133)
plt.imshow(pred_arx)
base_pth = '/content/drive/MyDrive/binary_lane_bdd'
img_pth = os.path.join(base_pth, 'images_sample/100k_sample/train')
lbl_pth = os.path.join(base_pth, 'lane_sample/masks/train')
binary_lbl_pth = os.path.join(base_pth, 'lane_binary_sample/masks/train')
tst_pth = os.path.join(base_pth, 'Images_here')
tst_binary_lbl_pth = os.path.join(base_pth, 'Labels Binary_here')
img_files = [i.replace('.jpg','') for i in os.listdir(img_pth)]
msk_files = [i.replace('.png','') for i in os.listdir(binary_lbl_pth)]
tst_files = [i.replace('.jpg','') for i in os.listdir(tst_pth)]
tst_files = list(set(tst_files) - set(img_files))
msk_files = tst_files
file_df = pd.DataFrame(os.listdir(img_pth)[1:], columns = ['filename'])
train_df, val_df = train_test_split(file_df, test_size = 0.2, random_state = 42)
valid_fnames = list(val_df['filename'].values)
tst_fnames = [i+'.jpg' for i in tst_files]
train_df.shape
(5599, 1)
val_df.shape
(1400, 1)
fnames = get_image_files(img_pth)
lbl_names = get_image_files(binary_lbl_pth)
tst_fnames = get_image_files(tst_pth)
tst_lbl_names = get_image_files(tst_binary_lbl_pth)
img_fn = fnames[0]
img = PILImage.create(img_fn)
img.show(figsize=(5,5))
<matplotlib.axes._subplots.AxesSubplot at 0x7f0900a2e210>
get_msk = lambda o: Path(o.as_posix().replace('images_sample/100k_sample','lane_binary_sample/masks').replace('.jpg','.png'))
get_msk_tst = lambda o: Path(o.as_posix().replace('Images_here','Labels Binary_here').replace('.jpg','.png'))
# Calculate the Class Weight
master_out = []
for img_fn in tqdm_notebook(fnames[:2]):
msk = PILMask.create(get_msk(img_fn))
#msk.show(figsize=(5,5), alpha=1)
out1, out2 = np.unique(msk, return_counts=True)
master_out.append(out2)
class_weight_df = pd.DataFrame(master_out, columns = ['cnt_0', 'cnt_1'])
class_weight_df['weight_0'] = (720*1280)/(2*class_weight_df['cnt_0'])
class_weight_df['weight_1'] = (720*1280)/(2*class_weight_df['cnt_1'])
/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:2: TqdmDeprecationWarning: This function will be removed in tqdm==5.0.0 Please use `tqdm.notebook.tqdm` instead of `tqdm.tqdm_notebook`
codes = ['0','1']
sz = msk.shape; sz
(720, 1280)
# For Resizing Image
half = tuple(int(x/2) for x in sz);
quarter = tuple(int(x/4) for x in sz);
quarter
(180, 320)
# Define Train Data Loader
bdd = DataBlock(blocks=(ImageBlock, MaskBlock(codes)),
get_items=get_image_files,
splitter=FileSplitter(valid_fnames),
get_y=get_msk,
batch_tfms=[*aug_transforms(mult=1.0,
do_flip=True,
flip_vert=False,
max_rotate=20.0,
min_zoom=0.75,
max_zoom=1.25,
max_lighting=0.7,
max_warp=0.2,
p_affine=0.75,
p_lighting=0.75,
xtra_tfms=[GaussianBlur(kernel_size=5)],
size=half,
mode='bilinear',
pad_mode='reflection',
align_corners=True,
batch=False,
min_scale=1.0),
Normalize.from_stats(*imagenet_stats)])
dls = bdd.dataloaders(img_pth, bs=8)
/usr/local/lib/python3.7/dist-packages/torch/_tensor.py:1051: UserWarning: __floordiv__ is deprecated, and its behavior will change in a future version of pytorch. It currently rounds toward 0 (like the 'trunc' function NOT 'floor'). This results in incorrect rounding for negative values. To keep the current behavior, use torch.div(a, b, rounding_mode='trunc'), or for actual floor division, use torch.div(a, b, rounding_mode='floor'). ret = func(*args, **kwargs)
# Define Test Data Loader
bdd_tst = DataBlock(blocks=(ImageBlock, MaskBlock(codes)),
get_items=get_image_files,
splitter=FileSplitter(tst_fnames),
get_y=get_msk_tst,
batch_tfms=[*aug_transforms(mult=1.0,
do_flip=True,
flip_vert=False,
max_rotate=20.0,
min_zoom=0.75,
max_zoom=1.25,
max_lighting=0.7,
max_warp=0.2,
p_affine=0.75,
p_lighting=0.75,
xtra_tfms=[GaussianBlur(kernel_size=5)],
size=half,
mode='bilinear',
pad_mode='reflection',
align_corners=True,
batch=False,
min_scale=1.0),
Normalize.from_stats(*imagenet_stats)])
dls_tst = bdd_tst.dataloaders(tst_pth, bs=8)
/usr/local/lib/python3.7/dist-packages/torch/_tensor.py:1051: UserWarning: __floordiv__ is deprecated, and its behavior will change in a future version of pytorch. It currently rounds toward 0 (like the 'trunc' function NOT 'floor'). This results in incorrect rounding for negative values. To keep the current behavior, use torch.div(a, b, rounding_mode='trunc'), or for actual floor division, use torch.div(a, b, rounding_mode='floor'). ret = func(*args, **kwargs)
dls.show_batch(max_n=2, vmin=0, vmax=1, figsize=(14,10))
/usr/local/lib/python3.7/dist-packages/torch/_tensor.py:1051: UserWarning: __floordiv__ is deprecated, and its behavior will change in a future version of pytorch. It currently rounds toward 0 (like the 'trunc' function NOT 'floor'). This results in incorrect rounding for negative values. To keep the current behavior, use torch.div(a, b, rounding_mode='trunc'), or for actual floor division, use torch.div(a, b, rounding_mode='floor'). ret = func(*args, **kwargs)
dls_tst.show_batch(max_n=4, vmin=0, vmax=1, figsize=(14,10))
/usr/local/lib/python3.7/dist-packages/torch/_tensor.py:1051: UserWarning: __floordiv__ is deprecated, and its behavior will change in a future version of pytorch. It currently rounds toward 0 (like the 'trunc' function NOT 'floor'). This results in incorrect rounding for negative values. To keep the current behavior, use torch.div(a, b, rounding_mode='trunc'), or for actual floor division, use torch.div(a, b, rounding_mode='floor'). ret = func(*args, **kwargs)
dls.vocab = codes
dls_tst.vocab = codes
name2id = {v:k for k,v in enumerate(codes)}
opt = ranger
weights = torch.Tensor([0.5, 100])
bg_iou = IOU(class_index = 0, class_label = 'Not-Lane', axis=1)
fg_iou = IOU(class_index = 1, class_label = 'Lane', axis=1)
learn = unet_learner(dls,
tvm.resnet34,
metrics=[fg_accuracy, bg_accuracy, fg_iou, bg_iou, JaccardCoeff(), Dice()],
self_attention=True,
act_cls=Mish,
opt_func=opt,
pretrained=True,
loss_func=CombinedLoss(axis=1, smooth=1, alpha = 0.01))
/usr/local/lib/python3.7/dist-packages/torch/_tensor.py:1051: UserWarning: __floordiv__ is deprecated, and its behavior will change in a future version of pytorch. It currently rounds toward 0 (like the 'trunc' function NOT 'floor'). This results in incorrect rounding for negative values. To keep the current behavior, use torch.div(a, b, rounding_mode='trunc'), or for actual floor division, use torch.div(a, b, rounding_mode='floor'). ret = func(*args, **kwargs) Downloading: "https://download.pytorch.org/models/resnet34-b627a593.pth" to /root/.cache/torch/hub/checkpoints/resnet34-b627a593.pth
learn.summary()
/usr/local/lib/python3.7/dist-packages/torch/_tensor.py:1051: UserWarning: __floordiv__ is deprecated, and its behavior will change in a future version of pytorch. It currently rounds toward 0 (like the 'trunc' function NOT 'floor'). This results in incorrect rounding for negative values. To keep the current behavior, use torch.div(a, b, rounding_mode='trunc'), or for actual floor division, use torch.div(a, b, rounding_mode='floor'). ret = func(*args, **kwargs)
DynamicUnet (Input shape: 8 x 3 x 360 x 640)
============================================================================
Layer (type) Output Shape Param # Trainable
============================================================================
8 x 64 x 180 x 320
Conv2d 9408 False
BatchNorm2d 128 True
ReLU
____________________________________________________________________________
8 x 64 x 90 x 160
MaxPool2d
Conv2d 36864 False
BatchNorm2d 128 True
ReLU
Conv2d 36864 False
BatchNorm2d 128 True
Conv2d 36864 False
BatchNorm2d 128 True
ReLU
Conv2d 36864 False
BatchNorm2d 128 True
Conv2d 36864 False
BatchNorm2d 128 True
ReLU
Conv2d 36864 False
BatchNorm2d 128 True
____________________________________________________________________________
8 x 128 x 45 x 80
Conv2d 73728 False
BatchNorm2d 256 True
ReLU
Conv2d 147456 False
BatchNorm2d 256 True
Conv2d 8192 False
BatchNorm2d 256 True
Conv2d 147456 False
BatchNorm2d 256 True
ReLU
Conv2d 147456 False
BatchNorm2d 256 True
Conv2d 147456 False
BatchNorm2d 256 True
ReLU
Conv2d 147456 False
BatchNorm2d 256 True
Conv2d 147456 False
BatchNorm2d 256 True
ReLU
Conv2d 147456 False
BatchNorm2d 256 True
____________________________________________________________________________
8 x 256 x 23 x 40
Conv2d 294912 False
BatchNorm2d 512 True
ReLU
Conv2d 589824 False
BatchNorm2d 512 True
Conv2d 32768 False
BatchNorm2d 512 True
Conv2d 589824 False
BatchNorm2d 512 True
ReLU
Conv2d 589824 False
BatchNorm2d 512 True
Conv2d 589824 False
BatchNorm2d 512 True
ReLU
Conv2d 589824 False
BatchNorm2d 512 True
Conv2d 589824 False
BatchNorm2d 512 True
ReLU
Conv2d 589824 False
BatchNorm2d 512 True
Conv2d 589824 False
BatchNorm2d 512 True
ReLU
Conv2d 589824 False
BatchNorm2d 512 True
Conv2d 589824 False
BatchNorm2d 512 True
ReLU
Conv2d 589824 False
BatchNorm2d 512 True
____________________________________________________________________________
8 x 512 x 12 x 20
Conv2d 1179648 False
BatchNorm2d 1024 True
ReLU
Conv2d 2359296 False
BatchNorm2d 1024 True
Conv2d 131072 False
BatchNorm2d 1024 True
Conv2d 2359296 False
BatchNorm2d 1024 True
ReLU
Conv2d 2359296 False
BatchNorm2d 1024 True
Conv2d 2359296 False
BatchNorm2d 1024 True
ReLU
Conv2d 2359296 False
BatchNorm2d 1024 True
BatchNorm2d 1024 True
ReLU
____________________________________________________________________________
8 x 1024 x 12 x 20
Conv2d 4719616 True
Mish
____________________________________________________________________________
8 x 512 x 12 x 20
Conv2d 4719104 True
Mish
____________________________________________________________________________
8 x 1024 x 12 x 20
Conv2d 525312 True
Mish
____________________________________________________________________________
8 x 256 x 24 x 40
PixelShuffle
BatchNorm2d 512 True
Conv2d 2359808 True
Mish
Conv2d 2359808 True
Mish
Mish
____________________________________________________________________________
8 x 1024 x 23 x 40
Conv2d 525312 True
Mish
____________________________________________________________________________
8 x 256 x 46 x 80
PixelShuffle
BatchNorm2d 256 True
Conv2d 1327488 True
Mish
Conv2d 1327488 True
Mish
____________________________________________________________________________
8 x 48 x 3600
Conv1d 18432 True
Conv1d 18432 True
Conv1d 147456 True
Mish
____________________________________________________________________________
8 x 768 x 45 x 80
Conv2d 295680 True
Mish
____________________________________________________________________________
8 x 192 x 90 x 160
PixelShuffle
BatchNorm2d 128 True
Conv2d 590080 True
Mish
Conv2d 590080 True
Mish
Mish
____________________________________________________________________________
8 x 512 x 90 x 160
Conv2d 131584 True
Mish
____________________________________________________________________________
8 x 128 x 180 x 320
PixelShuffle
BatchNorm2d 128 True
____________________________________________________________________________
8 x 96 x 180 x 320
Conv2d 165984 True
Mish
Conv2d 83040 True
Mish
Mish
____________________________________________________________________________
8 x 384 x 180 x 320
Conv2d 37248 True
Mish
____________________________________________________________________________
8 x 96 x 360 x 640
PixelShuffle
ResizeToOrig
____________________________________________________________________________
8 x 99 x 360 x 640
MergeLayer
Conv2d 88308 True
Mish
Conv2d 88308 True
Sequential
Mish
____________________________________________________________________________
8 x 2 x 360 x 640
Conv2d 200 True
ToTensorBase
____________________________________________________________________________
Total params: 41,405,488
Total trainable params: 20,137,840
Total non-trainable params: 21,267,648
Optimizer used: <function ranger at 0x7f0a99f0ff80>
Loss function: <__main__.CombinedLoss object at 0x7f0a937b3290>
Model frozen up to parameter group #2
Callbacks:
- TrainEvalCallback
- Recorder
- ProgressCallback
learn.lr_find()
/usr/local/lib/python3.7/dist-packages/torch/_tensor.py:1051: UserWarning: __floordiv__ is deprecated, and its behavior will change in a future version of pytorch. It currently rounds toward 0 (like the 'trunc' function NOT 'floor'). This results in incorrect rounding for negative values. To keep the current behavior, use torch.div(a, b, rounding_mode='trunc'), or for actual floor division, use torch.div(a, b, rounding_mode='floor'). ret = func(*args, **kwargs)
SuggestedLRs(valley=7.585775892948732e-05)
lr = 1e-3
learn.fit_flat_cos(100, slice(lr),
cbs=[
ShowGraphCallback(),
EarlyStoppingCallback(monitor='valid_loss', comp=None, min_delta=0.0, patience=10, reset_on_fit=True)
])
| epoch | train_loss | valid_loss | fg_accuracy | bg_accuracy | Lane | Not-Lane | jaccard_coeff | dice | time |
|---|---|---|---|---|---|---|---|---|---|
| 0 | 0.079782 | 0.077297 | 0.331704 | 0.995397 | 0.196628 | 0.990966 | 0.196628 | 0.328637 | 06:48 |
| 1 | 0.076285 | 0.073079 | 0.363984 | 0.995662 | 0.220829 | 0.991465 | 0.220829 | 0.361769 | 06:09 |
| 2 | 0.075650 | 0.071891 | 0.398412 | 0.995074 | 0.229749 | 0.991105 | 0.229749 | 0.373652 | 06:09 |
| 3 | 0.074405 | 0.071284 | 0.442927 | 0.994390 | 0.240878 | 0.990718 | 0.240878 | 0.388238 | 06:09 |
| 4 | 0.072471 | 0.070306 | 0.422155 | 0.994979 | 0.241536 | 0.991164 | 0.241536 | 0.389093 | 06:09 |
| 5 | 0.072441 | 0.069743 | 0.445764 | 0.994564 | 0.246248 | 0.990930 | 0.246248 | 0.395183 | 06:09 |
| 6 | 0.071724 | 0.068843 | 0.441417 | 0.994716 | 0.247044 | 0.991025 | 0.247044 | 0.396207 | 06:08 |
| 7 | 0.071885 | 0.069038 | 0.444364 | 0.994779 | 0.249502 | 0.991123 | 0.249502 | 0.399362 | 06:08 |
| 8 | 0.071666 | 0.068612 | 0.427337 | 0.995138 | 0.247272 | 0.991363 | 0.247272 | 0.396500 | 06:08 |
| 9 | 0.071397 | 0.069435 | 0.418902 | 0.995187 | 0.243826 | 0.991367 | 0.243826 | 0.392058 | 06:08 |
| 10 | 0.070334 | 0.068384 | 0.425330 | 0.995152 | 0.246635 | 0.991379 | 0.246635 | 0.395681 | 06:08 |
| 11 | 0.070207 | 0.067642 | 0.438746 | 0.995125 | 0.253716 | 0.991424 | 0.253716 | 0.404743 | 06:08 |
| 12 | 0.070279 | 0.068215 | 0.440456 | 0.994929 | 0.251070 | 0.991215 | 0.251070 | 0.401368 | 06:08 |
| 13 | 0.070733 | 0.067913 | 0.447435 | 0.994833 | 0.252518 | 0.991181 | 0.252518 | 0.403216 | 06:08 |
| 14 | 0.069877 | 0.067809 | 0.457887 | 0.994593 | 0.253844 | 0.991003 | 0.253844 | 0.404905 | 06:08 |
| 15 | 0.070296 | 0.067913 | 0.486409 | 0.993871 | 0.254813 | 0.990479 | 0.254813 | 0.406137 | 06:08 |
| 16 | 0.069692 | 0.067575 | 0.425357 | 0.995378 | 0.251405 | 0.991585 | 0.251405 | 0.401796 | 06:08 |
| 17 | 0.070062 | 0.067760 | 0.450225 | 0.994898 | 0.255734 | 0.991282 | 0.255734 | 0.407306 | 06:08 |
| 18 | 0.069064 | 0.067776 | 0.452832 | 0.994728 | 0.253224 | 0.991121 | 0.253224 | 0.404116 | 06:08 |
| 19 | 0.070150 | 0.067413 | 0.435252 | 0.995332 | 0.256864 | 0.991601 | 0.256864 | 0.408738 | 06:08 |
| 20 | 0.068939 | 0.067501 | 0.431462 | 0.995343 | 0.254397 | 0.991611 | 0.254397 | 0.405608 | 06:08 |
| 21 | 0.069764 | 0.067054 | 0.454562 | 0.994909 | 0.258306 | 0.991316 | 0.258306 | 0.410562 | 06:08 |
| 22 | 0.069343 | 0.067568 | 0.427449 | 0.995387 | 0.252764 | 0.991595 | 0.252764 | 0.403531 | 06:08 |
| 23 | 0.068644 | 0.067077 | 0.437344 | 0.995216 | 0.255543 | 0.991521 | 0.255543 | 0.407064 | 06:07 |
| 24 | 0.069337 | 0.067689 | 0.462224 | 0.994763 | 0.259449 | 0.991211 | 0.259449 | 0.412003 | 06:08 |
| 25 | 0.067813 | 0.067008 | 0.450582 | 0.994985 | 0.257831 | 0.991364 | 0.257831 | 0.409961 | 06:07 |
| 26 | 0.068892 | 0.066741 | 0.463081 | 0.994821 | 0.260792 | 0.991276 | 0.260792 | 0.413695 | 06:08 |
| 27 | 0.068404 | 0.067320 | 0.465975 | 0.994573 | 0.258258 | 0.991031 | 0.258258 | 0.410501 | 06:08 |
| 28 | 0.068546 | 0.067109 | 0.470545 | 0.994422 | 0.256544 | 0.990933 | 0.256544 | 0.408332 | 06:08 |
| 29 | 0.067677 | 0.066949 | 0.446581 | 0.995131 | 0.258482 | 0.991487 | 0.258482 | 0.410784 | 06:08 |
| 30 | 0.068290 | 0.067617 | 0.463883 | 0.994749 | 0.259640 | 0.991236 | 0.259640 | 0.412244 | 06:08 |
| 31 | 0.067487 | 0.067042 | 0.458271 | 0.994940 | 0.261738 | 0.991355 | 0.261738 | 0.414884 | 06:08 |
| 32 | 0.067344 | 0.066837 | 0.452452 | 0.995014 | 0.259112 | 0.991428 | 0.259112 | 0.411579 | 06:08 |
| 33 | 0.067717 | 0.067319 | 0.453502 | 0.994770 | 0.254732 | 0.991153 | 0.254732 | 0.406033 | 06:08 |
| 34 | 0.068004 | 0.066063 | 0.422400 | 0.995667 | 0.256328 | 0.991851 | 0.256328 | 0.408059 | 06:08 |
| 35 | 0.067874 | 0.066344 | 0.462225 | 0.994875 | 0.261404 | 0.991341 | 0.261404 | 0.414465 | 06:08 |
| 36 | 0.067676 | 0.066928 | 0.474829 | 0.994492 | 0.260263 | 0.991028 | 0.260263 | 0.413030 | 06:09 |
| 37 | 0.069547 | 0.067361 | 0.466288 | 0.994606 | 0.258198 | 0.991093 | 0.258198 | 0.410426 | 06:08 |
| 38 | 0.069359 | 0.066862 | 0.448744 | 0.995145 | 0.260454 | 0.991503 | 0.260454 | 0.413270 | 06:08 |
| 39 | 0.068537 | 0.066311 | 0.475395 | 0.994560 | 0.262764 | 0.991117 | 0.262764 | 0.416173 | 06:08 |
| 40 | 0.067514 | 0.066879 | 0.448548 | 0.995099 | 0.258728 | 0.991467 | 0.258728 | 0.411095 | 06:08 |
| 41 | 0.068239 | 0.066970 | 0.431842 | 0.995480 | 0.257866 | 0.991736 | 0.257866 | 0.410006 | 06:08 |
| 42 | 0.068777 | 0.066966 | 0.471025 | 0.994625 | 0.262051 | 0.991137 | 0.262051 | 0.415278 | 06:08 |
| 43 | 0.068686 | 0.066956 | 0.454428 | 0.995022 | 0.260889 | 0.991422 | 0.260889 | 0.413817 | 06:08 |
| 44 | 0.065749 | 0.065922 | 0.466639 | 0.994784 | 0.262846 | 0.991263 | 0.262846 | 0.416276 | 06:08 |
| 45 | 0.068253 | 0.066679 | 0.444204 | 0.995197 | 0.258981 | 0.991546 | 0.258981 | 0.411414 | 06:08 |
| 46 | 0.067907 | 0.066211 | 0.447041 | 0.995185 | 0.260058 | 0.991551 | 0.260058 | 0.412771 | 06:08 |
| 47 | 0.068368 | 0.067035 | 0.430355 | 0.995408 | 0.255441 | 0.991655 | 0.255441 | 0.406935 | 06:08 |
| 48 | 0.068218 | 0.066512 | 0.458801 | 0.994957 | 0.262459 | 0.991377 | 0.262459 | 0.415790 | 06:08 |
| 49 | 0.067952 | 0.066904 | 0.444987 | 0.995146 | 0.258187 | 0.991491 | 0.258187 | 0.410412 | 06:08 |
| 50 | 0.068403 | 0.066333 | 0.475053 | 0.994563 | 0.261990 | 0.991101 | 0.261990 | 0.415201 | 06:08 |
| 51 | 0.066860 | 0.066450 | 0.495040 | 0.994145 | 0.264726 | 0.990807 | 0.264726 | 0.418630 | 06:08 |
| 52 | 0.069478 | 0.066801 | 0.475144 | 0.994579 | 0.262609 | 0.991127 | 0.262609 | 0.415978 | 06:08 |
| 53 | 0.068324 | 0.066504 | 0.461926 | 0.994846 | 0.260675 | 0.991295 | 0.260675 | 0.413549 | 06:08 |
| 54 | 0.067516 | 0.066768 | 0.460322 | 0.994824 | 0.259897 | 0.991257 | 0.259897 | 0.412568 | 06:07 |
/usr/local/lib/python3.7/dist-packages/torch/_tensor.py:1051: UserWarning: __floordiv__ is deprecated, and its behavior will change in a future version of pytorch. It currently rounds toward 0 (like the 'trunc' function NOT 'floor'). This results in incorrect rounding for negative values. To keep the current behavior, use torch.div(a, b, rounding_mode='trunc'), or for actual floor division, use torch.div(a, b, rounding_mode='floor'). ret = func(*args, **kwargs)
No improvement since epoch 44: early stopping
learn.show_results(max_n=8, figsize=(24,48))
/usr/local/lib/python3.7/dist-packages/torch/_tensor.py:1051: UserWarning: __floordiv__ is deprecated, and its behavior will change in a future version of pytorch. It currently rounds toward 0 (like the 'trunc' function NOT 'floor'). This results in incorrect rounding for negative values. To keep the current behavior, use torch.div(a, b, rounding_mode='trunc'), or for actual floor division, use torch.div(a, b, rounding_mode='floor'). ret = func(*args, **kwargs)
idx = 6
log_dict = {}
log_dict['learn'] = learn
#base_pth
'/content/drive/MyDrive/binary_lane_bdd'
#learn.save(base_pth + '/results/model_{}'.format(idx))
Path('/content/drive/MyDrive/binary_lane_bdd/results/model_6.pth')
learn.load(base_pth+'/results/model_6')
<fastai.learner.Learner at 0x7f0a937fd910>
learn.show_results(max_n=8, figsize=(24,48))
/usr/local/lib/python3.7/dist-packages/torch/_tensor.py:1051: UserWarning: __floordiv__ is deprecated, and its behavior will change in a future version of pytorch. It currently rounds toward 0 (like the 'trunc' function NOT 'floor'). This results in incorrect rounding for negative values. To keep the current behavior, use torch.div(a, b, rounding_mode='trunc'), or for actual floor division, use torch.div(a, b, rounding_mode='floor'). ret = func(*args, **kwargs)
dl = dls_tst.test_dl(test_items = tst_fnames, with_labels=True)
dl.show_batch(max_n= 6, figsize = (24,48))
/usr/local/lib/python3.7/dist-packages/torch/_tensor.py:1051: UserWarning: __floordiv__ is deprecated, and its behavior will change in a future version of pytorch. It currently rounds toward 0 (like the 'trunc' function NOT 'floor'). This results in incorrect rounding for negative values. To keep the current behavior, use torch.div(a, b, rounding_mode='trunc'), or for actual floor division, use torch.div(a, b, rounding_mode='floor'). ret = func(*args, **kwargs)
print('Jaccard Coefficient : {}'.format(compute_val(JaccardCoeff(), cast(preds, TensorImage), cast(targs, TensorMask))))
print('Dice : {}'.format(compute_val(Dice(), cast(preds, TensorImage), cast(targs, TensorMask))))
print('FG Accuracy : {}'.format(fg_accuracy(preds, targs)))
print('BG Accuracy : {}'.format(bg_accuracy(preds, targs)))
Jaccard Coefficient : 0.2674524121048411 Dice : 0.42203148544360175 FG Accuracy : 0.3162895739078522 BG Accuracy : 0.9973511099815369
test_results = []
for idx in tqdm_notebook(range(501)):
jcf = compute_val(JaccardCoeff(), cast(preds[idx].reshape(1,2,360,640), TensorImage), cast(targs[idx].reshape(1,360,640), TensorMask))
dice = compute_val(Dice(), cast(preds[idx].reshape(1,2,360,640), TensorImage), cast(targs[idx].reshape(1,360,640), TensorMask))
fga = fg_accuracy(preds[idx].reshape(1,2,360,640), targs[idx].reshape(1,360,640))
bga = bg_accuracy(preds[idx].reshape(1,2,360,640), targs[idx].reshape(1,360,640))
test_results.append([idx, tst_fnames[idx], jcf, dice, fga, bga])
test_results = pd.DataFrame(test_results, columns = ['index', 'path', 'iou', 'dice', 'fg_acc', 'bg_acc'])
test_results = test_results.sort_values(by=['iou'])
/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:2: TqdmDeprecationWarning: This function will be removed in tqdm==5.0.0 Please use `tqdm.notebook.tqdm` instead of `tqdm.tqdm_notebook`
plot_test_results(dls_tst = dls_tst, fnames = tst_fnames, idx=186)
/usr/local/lib/python3.7/dist-packages/torch/_tensor.py:1051: UserWarning: __floordiv__ is deprecated, and its behavior will change in a future version of pytorch. It currently rounds toward 0 (like the 'trunc' function NOT 'floor'). This results in incorrect rounding for negative values. To keep the current behavior, use torch.div(a, b, rounding_mode='trunc'), or for actual floor division, use torch.div(a, b, rounding_mode='floor'). ret = func(*args, **kwargs)